dexay commited on
Commit
6db0306
·
1 Parent(s): 798fe4e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -17
app.py CHANGED
@@ -15,17 +15,21 @@ st.write("This tool lets you extract relation triples concerning interactions be
15
  st.write("It is the result of an end of studies project within ESI school and dedicated to biomedical researchers looking to extract precise information about the subject without digging into long publications.")
16
 
17
 
18
- @st.cache
19
- def load_models():
20
- tokenizer = AutoTokenizer.from_pretrained("dmis-lab/biobert-large-cased-v1.1", truncation = True, padding=True, model_max_length=512,)
 
 
 
 
 
21
  model_checkpoint = BertForTokenClassification.from_pretrained("dexay/Ner2HgF", )
22
-
 
 
23
  model_re = AutoModelForSequenceClassification.from_pretrained("dexay/reDs3others", )
 
24
 
25
- return tokenizer , model_checkpoint , model_re
26
-
27
-
28
-
29
  form = st.form(key='my-form')
30
  x = form.text_area('Enter text', height=250)
31
  submit = form.form_submit_button('Submit')
@@ -38,14 +42,10 @@ if submit and len(x) != 0:
38
  #model.to("cpu")
39
  st.text("Execution is in progress ...")
40
 
41
- tmr = load_models()
42
-
43
- tokenizer = tmr[0]
44
- model_checkpoint = tmr[1]
45
- model_re = tmr[2]
46
 
47
 
48
- token_classifier = pipeline("token-classification", tokenizer = tokenizer,model=model_checkpoint, )
49
 
50
 
51
 
@@ -74,6 +74,7 @@ if submit and len(x) != 0:
74
 
75
  #tokenized_dat = tokenize_function(ddata)
76
 
 
77
  az = token_classifier(ddata)
78
 
79
 
@@ -189,13 +190,12 @@ if submit and len(x) != 0:
189
 
190
  # Relation extraction part
191
 
192
- token_classifier = pipeline("text-classification", tokenizer = tokenizer,model=model_re,
193
- )
194
 
195
  rrdata = lstSentEnc
196
 
197
 
198
-
199
  outre = token_classifier(rrdata)
200
 
201
 
 
15
  st.write("It is the result of an end of studies project within ESI school and dedicated to biomedical researchers looking to extract precise information about the subject without digging into long publications.")
16
 
17
 
18
+ @st.cache(allow_output_mutation = True)
19
+ def load_tokenizer():
20
+ return AutoTokenizer.from_pretrained("dmis-lab/biobert-large-cased-v1.1", truncation = True, padding=True, model_max_length=512,)
21
+
22
+ tokenizer = load_tokenizer()
23
+
24
+ @st.cache(allow_output_mutation = True)
25
+ def load_modelNER(tokenizer):
26
  model_checkpoint = BertForTokenClassification.from_pretrained("dexay/Ner2HgF", )
27
+ return pipeline("token-classification", tokenizer = tokenizer,model=model_checkpoint, )
28
+ @st.cache(allow_output_mutation = True)
29
+ def load_modelRE(tokenizer):
30
  model_re = AutoModelForSequenceClassification.from_pretrained("dexay/reDs3others", )
31
+ return pipeline("text-classification", tokenizer = tokenizer,model=model_re, )
32
 
 
 
 
 
33
  form = st.form(key='my-form')
34
  x = form.text_area('Enter text', height=250)
35
  submit = form.form_submit_button('Submit')
 
42
  #model.to("cpu")
43
  st.text("Execution is in progress ...")
44
 
45
+
 
 
 
 
46
 
47
 
48
+ #token_classifier = pipeline("token-classification", tokenizer = tokenizer,model=model_checkpoint, )
49
 
50
 
51
 
 
74
 
75
  #tokenized_dat = tokenize_function(ddata)
76
 
77
+ token_classifier = load_modelNER(tokenizer)
78
  az = token_classifier(ddata)
79
 
80
 
 
190
 
191
  # Relation extraction part
192
 
193
+ #token_classifier = pipeline("text-classification", tokenizer = tokenizer,model=model_re, )
 
194
 
195
  rrdata = lstSentEnc
196
 
197
 
198
+ token_classifier = load_modelRE(tokenizer)
199
  outre = token_classifier(rrdata)
200
 
201